bitkeeper revision 1.1159.1.471 (41ab4844yEdQ7zMWVGeDP2F7X0FEQQ)
authorcl349@arcadians.cl.cam.ac.uk <cl349@arcadians.cl.cam.ac.uk>
Mon, 29 Nov 2004 16:03:16 +0000 (16:03 +0000)
committercl349@arcadians.cl.cam.ac.uk <cl349@arcadians.cl.cam.ac.uk>
Mon, 29 Nov 2004 16:03:16 +0000 (16:03 +0000)
system.h:
  Make disabling/enabling interrupts preemption safe.
  Also g/c __save_and_sti.
entry.S:
  Make disabling/enabling interrupts preemption safe.

linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S
linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/system.h

index 7b56338e7ded9dcdf0af2d42e80aa5d751dd52e8..510714d237688bbfdae70d7c5063334d3a4dc5b6 100644 (file)
@@ -83,19 +83,38 @@ VM_MASK             = 0x00020000
 #define sizeof_vcpu_shift              3
 
 #ifdef CONFIG_SMP
-#define XEN_GET_VCPU_INFO(reg) movl TI_cpu(%ebp),reg                   ; \
+#define XEN_GET_VCPU_INFO(reg)
+#define preempt_disable(reg)   incl TI_preempt_count(reg)
+#define preempt_enable(reg)    decl TI_preempt_count(reg)
+#define XEN_LOCK_VCPU_INFO_SMP(reg) preempt_disable(%ebp)              ; \
+                               movl TI_cpu(%ebp),reg                   ; \
                                shl  $sizeof_vcpu_shift,reg             ; \
                                addl HYPERVISOR_shared_info,reg
-#define XEN_GET_VCPU_INFO_IF_SMP(reg) XEN_GET_VCPU_INFO(reg)
-#define GET_THREAD_INFO_IF_SMP(reg) GET_THREAD_INFO(reg)
+#define XEN_UNLOCK_VCPU_INFO_SMP(reg) preempt_enable(%ebp)
+#define XEN_UNLOCK_VCPU_INFO_SMP_fixup .byte 0x00,0x00,0x00
+#define XEN_BLOCK_EVENTS(reg)  XEN_LOCK_VCPU_INFO_SMP(reg)             ; \
+                               movb $1,evtchn_upcall_mask(reg)         ; \
+                               XEN_UNLOCK_VCPU_INFO_SMP(reg)
+#define XEN_UNBLOCK_EVENTS(reg)        XEN_LOCK_VCPU_INFO_SMP(reg)             ; \
+                               movb $0,evtchn_upcall_mask(reg)         ; \
+                               XEN_UNLOCK_VCPU_INFO_SMP(reg)
+#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) GET_THREAD_INFO(%ebp)                ; \
+                               XEN_LOCK_VCPU_INFO_SMP(reg)             ; \
+                               movb evtchn_upcall_mask(reg), tmp       ; \
+                               movb tmp, off(%esp)                     ; \
+                               XEN_UNLOCK_VCPU_INFO_SMP(reg)
 #else
 #define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
-#define XEN_GET_VCPU_INFO_IF_SMP(reg)
-#define GET_THREAD_INFO_IF_SMP(reg)
-#endif
-
+#define XEN_LOCK_VCPU_INFO_SMP(reg)
+#define XEN_UNLOCK_VCPU_INFO_SMP(reg)
+#define XEN_UNLOCK_VCPU_INFO_SMP_fixup
 #define XEN_BLOCK_EVENTS(reg)  movb $1,evtchn_upcall_mask(reg)
 #define XEN_UNBLOCK_EVENTS(reg)        movb $0,evtchn_upcall_mask(reg)
+#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) \
+       movb evtchn_upcall_mask(reg), tmp; \
+       movb tmp, off(%esp)
+#endif
+
 #define XEN_TEST_PENDING(reg)  testb $0xFF,evtchn_upcall_pending(reg)
 
 #ifdef CONFIG_PREEMPT
@@ -118,14 +137,12 @@ VM_MASK           = 0x00020000
        pushl %ebx; \
        movl $(__USER_DS), %edx; \
        movl %edx, %ds; \
-       movl %edx, %es; \
-       GET_THREAD_INFO_IF_SMP(%ebp); 
+       movl %edx, %es;
 
 #define SAVE_ALL \
        SAVE_ALL_NO_EVENTMASK; \
        XEN_GET_VCPU_INFO(%esi); \
-       movb evtchn_upcall_mask(%esi), %dl; \
-       movb %dl, EVENT_MASK(%esp)
+       XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK)
 
 #define RESTORE_INT_REGS \
        popl %ebx;      \
@@ -222,7 +239,6 @@ ENTRY(ret_from_fork)
        # userspace resumption stub bypassing syscall exit tracing
        ALIGN
 ret_from_exception:
-       XEN_GET_VCPU_INFO_IF_SMP(%esi)
        preempt_stop
 ret_from_intr:
        GET_THREAD_INFO(%ebp)
@@ -256,7 +272,6 @@ need_resched:
        XEN_UNBLOCK_EVENTS(%esi)
        call schedule
        movl $0,TI_preempt_count(%ebp)
-       XEN_GET_VCPU_INFO_IF_SMP(%esi)
        XEN_BLOCK_EVENTS(%esi)
        jmp need_resched
 #endif
@@ -322,7 +337,6 @@ syscall_call:
        call *sys_call_table(,%eax,4)
        movl %eax,EAX(%esp)             # store the return value
 syscall_exit:
-       XEN_GET_VCPU_INFO_IF_SMP(%esi)
        XEN_BLOCK_EVENTS(%esi)          # make sure we don't miss an interrupt
                                        # setting need_resched or sigpending
                                        # between sampling and the iret
@@ -334,9 +348,11 @@ restore_all:
        jnz resume_vm86
        movb EVENT_MASK(%esp), %al
        notb %al                        # %al == ~saved_mask
+       XEN_LOCK_VCPU_INFO_SMP(%esi)
        andb evtchn_upcall_mask(%esi),%al
        andb $1,%al                     # %al == mask & ~saved_mask
        jnz restore_all_enable_events   #     != 0 => reenable event delivery
+       XEN_UNLOCK_VCPU_INFO_SMP(%esi)
        RESTORE_ALL
 
 resume_vm86:
@@ -354,7 +370,6 @@ work_pending:
        jz work_notifysig
 work_resched:
        call schedule
-       XEN_GET_VCPU_INFO_IF_SMP(%esi)
        XEN_BLOCK_EVENTS(%esi)          # make sure we don't miss an interrupt
                                        # setting need_resched or sigpending
                                        # between sampling and the iret
@@ -400,7 +415,6 @@ syscall_trace_entry:
        # perform syscall exit tracing
        ALIGN
 syscall_exit_work:
-       XEN_GET_VCPU_INFO_IF_SMP(%esi)
        testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
        jz work_pending
        XEN_UNBLOCK_EVENTS(%esi)        # could let do_syscall_trace() call
@@ -488,10 +502,8 @@ error_code:
        movl $(__USER_DS), %edx
        movl %edx, %ds
        movl %edx, %es
-       GET_THREAD_INFO_IF_SMP(%ebp) 
        XEN_GET_VCPU_INFO(%esi)
-       movb evtchn_upcall_mask(%esi), %dl
-       movb %dl, EVENT_MASK+8(%esp)
+       XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK+8)
        call *%edi
        addl $8, %esp
        jmp ret_from_exception
@@ -528,8 +540,10 @@ restore_all_enable_events:
 scrit: /**** START OF CRITICAL REGION ****/
        XEN_TEST_PENDING(%esi)
        jnz  14f                        # process more events if necessary...
+       XEN_UNLOCK_VCPU_INFO_SMP(%esi)
        RESTORE_ALL
 14:    XEN_BLOCK_EVENTS(%esi)
+       XEN_UNLOCK_VCPU_INFO_SMP(%esi)
        jmp  11b
 ecrit:  /**** END OF CRITICAL REGION ****/
 # [How we do the fixup]. We want to merge the current stack frame with the
@@ -560,6 +574,7 @@ critical_region_fixup:
 critical_fixup_table:
        .byte 0x00,0x00,0x00            # testb $0x1,(%esi) = XEN_TEST_PENDING
        .byte 0x00,0x00                 # jnz  14f
+       XEN_UNLOCK_VCPU_INFO_SMP_fixup
        .byte 0x00                      # pop  %ebx
        .byte 0x04                      # pop  %ecx
        .byte 0x08                      # pop  %edx
@@ -572,6 +587,7 @@ critical_fixup_table:
        .byte 0x24,0x24,0x24            # add  $4,%esp
        .byte 0x28                      # iret
        .byte 0x00,0x00,0x00,0x00       # movb $1,1(%esi)
+       XEN_UNLOCK_VCPU_INFO_SMP_fixup
        .byte 0x00,0x00                 # jmp  11b
 
 # Hypervisor uses this for application faults while it executes.
@@ -712,7 +728,6 @@ ENTRY(int3)
        pushl %edx
        call do_int3
        addl $8,%esp
-       XEN_GET_VCPU_INFO_IF_SMP(%esi)
        testl %eax,%eax
        jnz restore_all
        jmp ret_from_exception
@@ -783,10 +798,8 @@ ENTRY(page_fault)
        movl $(__KERNEL_DS),%edx
        movl %edx,%ds
        movl %edx,%es
-       GET_THREAD_INFO_IF_SMP(%ebp) 
        XEN_GET_VCPU_INFO(%esi)
-       movb evtchn_upcall_mask(%esi), %dl
-       movb %dl, EVENT_MASK+12(%esp)
+       XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK+12)
        call do_page_fault
        addl $12,%esp
        jmp ret_from_exception
index 565685b55af9bab9afe343cd8f0f1b618eaa1cff..e9a53797e15ab3e17903529be5141f535485cdc9 100644 (file)
@@ -450,65 +450,62 @@ struct alt_instr {
 
 #define __cli()                                                                \
 do {                                                                   \
-       vcpu_info_t *_vcpu =                                            \
-               &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       vcpu_info_t *_vcpu;                                             \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        _vcpu->evtchn_upcall_mask = 1;                                  \
+       preempt_enable_no_resched();                                    \
        barrier();                                                      \
 } while (0)
 
 #define __sti()                                                                \
 do {                                                                   \
-       vcpu_info_t *_vcpu =                                            \
-               &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        _vcpu->evtchn_upcall_mask = 0;                                  \
        barrier(); /* unmask then check (avoid races) */                \
        if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
                force_evtchn_callback();                                \
+       preempt_enable();                                               \
 } while (0)
 
 #define __save_flags(x)                                                        \
 do {                                                                   \
-       vcpu_info_t *_vcpu =                                            \
-               &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       vcpu_info_t *_vcpu                                            \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        (x) = _vcpu->evtchn_upcall_mask;                                \
 } while (0)
 
 #define __restore_flags(x)                                             \
 do {                                                                   \
-       vcpu_info_t *_vcpu =                                            \
-               &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
-       if ( (_vcpu->evtchn_upcall_mask = (x)) == 0 ) {                 \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
                barrier(); /* unmask then check (avoid races) */        \
                if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
                        force_evtchn_callback();                        \
-       }                                                               \
+               preempt_enable();                                       \
+       } else                                                          \
+               preempt_enable_no_resched();                            \
 } while (0)
 
 #define safe_halt()            ((void)0)
 
 #define __save_and_cli(x)                                              \
 do {                                                                   \
-       vcpu_info_t *_vcpu =                                            \
-               &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       vcpu_info_t *_vcpu;                                             \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        (x) = _vcpu->evtchn_upcall_mask;                                \
        _vcpu->evtchn_upcall_mask = 1;                                  \
+       preempt_enable_no_resched();                                    \
        barrier();                                                      \
 } while (0)
 
-#define __save_and_sti(x)                                              \
-do {                                                                   \
-       vcpu_info_t *_vcpu =                                            \
-               &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
-       barrier();                                                      \
-       (x) = _vcpu->evtchn_upcall_mask;                                \
-       _vcpu->evtchn_upcall_mask = 0;                                  \
-       barrier(); /* unmask then check (avoid races) */                \
-       if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
-               force_evtchn_callback();                                \
-} while (0)
-
 #define local_irq_save(x)      __save_and_cli(x)
 #define local_irq_restore(x)   __restore_flags(x)
 #define local_save_flags(x)    __save_flags(x)